1 Data import

The data is included within this reproducible report and can be downloaded here (object rbt) and here (object rbt_sci_1).

1.1 Data wrangling

# create function to invert items
inv7fct <- function(x) (8-as.numeric(x))

# pivot them into long format
rbt_sci_l <- pivot_longer(rbt, 
                      cols = (abs1_tsm_1:abs2_tch_5), 
                      names_to = "variable", 
                      values_to = "value") %>%
    mutate(treat = case_when(                    # create treatment variable
        str_detect(variable, "abs1_") ~ toupper(treat1),
        str_detect(variable, "abs2_") ~ toupper(treat2)),
        variable = str_sub(variable, 6, -1)) # delete title page substring

# pivot them into wide format
rbt_sci_w <- pivot_wider(rbt_sci_l,
                     names_from = variable,
                     values_from = value,
                     id_cols = c(session, treat))

# invert trust items & build scales
data_rbt <- rbt_sci_w %>%
    mutate_at(vars(tru_exp_1:tru_ben_4),
              list(~inv7fct(.))) %>% # recoding 1-->7, 2-->6, ... %>% 
    rename(exp_1 = tru_exp_1, 
           exp_2 = tru_exp_2, 
           exp_3 = tru_exp_3, 
           exp_4 = tru_exp_4, 
           exp_5 = tru_exp_5, 
           exp_6 = tru_exp_6,
           int_1 = tru_int_1, 
           int_2 = tru_int_2, 
           int_3 = tru_int_3, 
           int_4 = tru_int_4,
           ben_1 = tru_ben_1, 
           ben_2 = tru_ben_2, 
           ben_3 = tru_ben_3, 
           ben_4 = tru_ben_4) %>% 
    group_by(session) %>% 
    mutate(meas_rep = 1:n()) %>% 
    ungroup() %>% 
    full_join(.,rbt_sci_1 %>% 
                  select(session, age, sex, position, position_oth) %>% 
                  distinct())
## Joining, by = "session"
# data frames with sum scales
data_scales <- data_rbt%>%
  mutate(Treatment = factor(treat, levels = c("GB", "CC", "CB")),
         Experitse = rowMeans(data.frame(exp_1, exp_2, exp_3, 
                                   exp_4, exp_5, exp_6), na.rm = T),
         Integrity = rowMeans(data.frame(int_1, int_2, int_3, int_4), na.rm = T),
         Benevolence = rowMeans(data.frame(ben_1, ben_2, ben_3, ben_4), na.rm = T),
         TSM = rowMeans(data.frame(tsm_1, tsm_2, tsm_3, tsm_4), na.rm = T))

data_scales_lables <- data_rbt%>%
  mutate(Treatment = factor(treat, levels = c("GB", "CC", "CB")),
         Treatment = fct_recode(Treatment,
                                `Colored badges` = "CB",
                                `Control Condition` = "CC",
                                `Greyed out badges` = "GB"),
         Experitse = rowMeans(data.frame(exp_1, exp_2, exp_3, 
                                   exp_4, exp_5, exp_6), na.rm = T),
         Integrity = rowMeans(data.frame(int_1, int_2, int_3, int_4), na.rm = T),
         Benevolence = rowMeans(data.frame(ben_1, ben_2, ben_3, ben_4), na.rm = T),
        `Topic specific multiplism` = rowMeans(data.frame(tsm_1, tsm_2, tsm_3, tsm_4), 
                                               na.rm = T))

data_scales_wide <- data_scales%>%
  select(Experitse, Integrity, Benevolence,
         TSM, Treatment, session)%>%
  gather(Variable, Value, -session, -Treatment)%>%
  mutate(Variable2 = paste(Variable, Treatment, sep = "_"))%>%
  select(-Variable, -Treatment)%>%
  spread(Variable2, Value)

data_scales_lables_wide <- data_scales_lables%>%
  select(Experitse, Integrity, Benevolence,
         `Topic specific multiplism`, Treatment, session)%>%
  gather(Variable, Value, -session, -Treatment)%>%
  mutate(Variable2 = paste(Variable, Treatment, sep = "_"))%>%
  select(-Variable, -Treatment)%>%
  spread(Variable2, Value)

# Data of treatment check
data_tch <- data_rbt %>% 
  select(starts_with("tch"), meas_rep, treat)

data_tch_n <- data_tch%>%
  mutate_all(function(x) ifelse(x == -999, NA, x)) %>% 
  filter(rowSums(is.na(data.frame(tch_1, tch_2, tch_3, tch_4))) != 4)

PID_list <- data_rbt$session %>% unique()

2 Sample

2.1 Sample size

length(PID_list)
## [1] 250
data_rbt %>%
  filter(meas_rep == 2) %>%
  select(session) %>% 
  distinct() %>% 
  nrow()
## [1] 250

2.2 Skipped after first measurement

sum(is.na(data_rbt%>%
            filter(meas_rep == 2)%>%
            pull(exp_1)))
## [1] 0

2.3 Demographics

data_rbt %>% 
  filter(meas_rep == 2)%>%
  mutate(position = as.factor(position),
         sex = as.factor(sex),
         age = as.factor(age)) %>% 
  select(age, position, sex) %>% 
  skim(.)
Data summary
Name Piped data
Number of rows 250
Number of columns 3
_______________________
Column type frequency:
factor 3
________________________
Group variables None

Variable type: factor

skim_variable n_missing complete_rate ordered n_unique top_counts
age 0 1 FALSE 3 16: 193, 35: 37, 50: 20
position 0 1 FALSE 6 -99: 122, 1: 91, 5: 18, 2: 13
sex 0 1 FALSE 2 f: 170, m: 80

2.3.1 Count on sex

data_rbt %>%
  filter(meas_rep == 2)%>%
  pull(sex) %>% 
  table(.)
## .
##   f   m 
## 170  80

2.3.2 Count on education

data_rbt %>%
  filter(meas_rep == 2)%>%
  pull(position) %>% 
  table(.)
## .
## -999    1    2    3    4    5 
##  122   91   13    5    1   18

2.3.3 Count on age

data_rbt %>%
  filter(meas_rep == 2)%>%
  pull(age) %>% 
  table(.)
## .
##  16  35  50 
## 193  37  20

3 Psychometric properties of the measurements

3.1 Descriptive overview

skewn <- function(x) DescTools::Skew(x, na.rm = T)
kurto <- function(x) DescTools::Kurt(x, na.rm = T)
maxabszvalue <- function(x) max(abs(scale(na.omit(x))))

my_skim <- skim_with(numeric = sfl(skewn, kurto, maxabszvalue))

data_scales%>%
  my_skim(.)
Data summary
Name Piped data
Number of rows 500
Number of columns 38
_______________________
Column type frequency:
character 4
factor 1
numeric 33
________________________
Group variables None

Variable type: character

skim_variable n_missing complete_rate min max empty n_unique whitespace
session 0 1.00 64 64 0 250 0
treat 0 1.00 2 2 0 3 0
sex 0 1.00 1 1 0 2 0
position_oth 270 0.46 2 97 0 103 0

Variable type: factor

skim_variable n_missing complete_rate ordered n_unique top_counts
Treatment 0 1 FALSE 3 CB: 172, CC: 165, GB: 163

Variable type: numeric

skim_variable n_missing complete_rate mean sd p0 p25 p50 p75 p100 hist skewn kurto maxabszvalue
tsm_1 0 1 2.03 0.83 1.00 1.00 2.00 3.00 4 ▆▇▁▅▁ 0.36 -0.61 2.37
tsm_2 0 1 2.48 0.82 1.00 2.00 2.00 3.00 4 ▂▇▁▆▂ 0.24 -0.51 1.85
tsm_3 0 1 2.27 0.82 1.00 2.00 2.00 3.00 4 ▃▇▁▆▁ 0.15 -0.53 2.12
tsm_4 0 1 2.67 0.80 1.00 2.00 3.00 3.00 4 ▁▇▁▇▃ 0.10 -0.65 2.10
tsc_1 0 1 2.85 0.72 1.00 2.00 3.00 3.00 4 ▁▅▁▇▂ -0.15 -0.32 2.56
tsc_2 0 1 1.92 0.72 1.00 1.00 2.00 2.00 4 ▅▇▁▂▁ 0.40 -0.18 2.87
tsc_3 0 1 2.90 0.70 1.00 2.75 3.00 3.00 4 ▁▃▁▇▂ -0.31 0.03 2.70
exp_1 0 1 5.49 1.29 1.00 5.00 6.00 7.00 7 ▁▁▂▃▇ -0.66 -0.18 3.49
exp_2 0 1 5.56 1.17 1.00 5.00 6.00 6.00 7 ▁▁▂▃▇ -0.63 0.02 3.89
exp_3 0 1 5.66 1.18 1.00 5.00 6.00 7.00 7 ▁▁▂▃▇ -0.73 0.03 3.94
exp_4 0 1 5.47 1.37 1.00 5.00 6.00 7.00 7 ▁▁▂▃▇ -0.74 -0.09 3.27
exp_5 0 1 5.13 1.38 1.00 4.00 5.00 6.00 7 ▁▂▃▅▇ -0.47 -0.45 3.00
exp_6 0 1 5.40 1.28 2.00 5.00 6.00 6.00 7 ▂▅▇▇▇ -0.51 -0.48 2.65
int_1 0 1 5.27 1.18 1.00 4.00 5.00 6.00 7 ▁▁▃▅▇ -0.33 -0.39 3.61
int_2 0 1 5.29 1.30 2.00 4.00 5.00 6.00 7 ▃▆▇▇▆ -0.41 -0.58 2.53
int_3 0 1 5.10 1.20 2.00 4.00 5.00 6.00 7 ▂▇▇▇▃ -0.09 -0.70 2.59
int_4 0 1 5.17 1.31 1.00 4.00 5.00 6.00 7 ▁▁▅▅▇ -0.29 -0.63 3.20
ben_1 0 1 5.10 1.20 2.00 4.00 5.00 6.00 7 ▂▇▇▆▅ -0.05 -0.67 2.58
ben_2 0 1 5.22 1.36 1.00 4.00 5.00 6.00 7 ▁▁▃▃▇ -0.45 -0.39 3.11
ben_3 0 1 5.26 1.32 1.00 4.00 5.00 6.00 7 ▁▁▃▃▇ -0.58 -0.11 3.22
ben_4 0 1 5.02 1.24 1.00 4.00 5.00 6.00 7 ▁▂▆▆▇ -0.08 -0.56 3.24
tch_1 0 1 -111.57 318.64 -999.00 1.00 2.00 4.00 4 ▁▁▁▁▇ -2.42 3.87 2.79
tch_2 0 1 -215.79 413.94 -999.00 1.00 1.50 4.00 4 ▂▁▁▁▇ -1.36 -0.15 1.89
tch_3 0 1 -209.78 409.77 -999.00 1.00 1.00 4.00 4 ▂▁▁▁▇ -1.41 -0.03 1.93
tch_4 0 1 -133.63 343.68 -999.00 1.00 2.00 4.00 4 ▁▁▁▁▇ -2.12 2.49 2.52
tch_5 0 1 -211.79 411.17 -999.00 1.00 1.00 4.00 4 ▂▁▁▁▇ -1.39 -0.07 1.91
meas_rep 0 1 1.50 0.50 1.00 1.00 1.50 2.00 2 ▇▁▁▁▇ 0.00 -2.00 1.00
age 0 1 21.53 10.75 16.00 16.00 16.00 16.00 50 ▇▁▂▁▁ 1.67 1.36 2.65
position 0 1 -486.61 500.74 -999.00 -999.00 1.00 1.00 5 ▇▁▁▁▇ -0.05 -2.00 1.02
Experitse 0 1 5.45 1.15 1.67 4.67 5.67 6.33 7 ▁▂▅▆▇ -0.53 -0.31 3.29
Integrity 0 1 5.21 1.11 2.00 4.50 5.25 6.00 7 ▁▅▇▇▆ -0.24 -0.58 2.90
Benevolence 0 1 5.15 1.13 1.75 4.25 5.25 6.00 7 ▁▃▇▇▇ -0.23 -0.48 3.01
TSM 0 1 2.36 0.58 1.00 2.00 2.25 2.75 4 ▂▅▇▂▁ 0.17 -0.25 2.84

3.2 METI

3.2.1 Dimensionality (CFA)

First we specified two consecutive threedimensional CFA models

# onedimensional model
cfa_meti_model_1d <- "exp =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6 +
                             int_1 + int_2 + int_3 + int_4 +
                             ben_1 + ben_2 + ben_3 + ben_4"

cfa1d_meti_1 <- cfa(cfa_meti_model_1d, data = data_rbt%>%filter(meas_rep == 1))
cfa1d_meti_2 <- cfa(cfa_meti_model_1d, data = data_rbt%>%filter(meas_rep == 2))

cfa_meti_model_1 <- "exp =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6
                     int =~ int_1 + int_2 + int_3 + int_4 
                     ben =~ ben_1 + ben_2 + ben_3 + ben_4"

cfa_meti_model_2 <- "exp =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6
                     int =~ int_1 + int_2 + int_3 + int_4 
                     ben =~ ben_1 + ben_2 + ben_3 + ben_4"

cfa_meti_1 <- cfa(cfa_meti_model_1, data = data_rbt%>%filter(meas_rep == 1))

# define a function which prints the fit
fpf <- function(x){  
  fm_tmp <- fitmeasures(x)
  return(cat(sprintf(
          "χ^2^ = %s, _df_ = %s, CFI = %s, TLI = %s, RMSEA = %s, SRMR = %s, SRMR~between~ = %s, SRMR~within~ = %s",
           round(fm_tmp[c("chisq")],3), 
                 fm_tmp[c("df")],
           round(fm_tmp[c("cfi")],3),
           round(fm_tmp[c("tli")],3),
           round(fm_tmp[c("rmsea")],3),
           round(fm_tmp[c("srmr")],3),
           round(fm_tmp[c("srmr_between")],3),
           round(fm_tmp[c("srmr_within")],3))))
}

# print the fit for cfa_meti_1
fpf(cfa1d_meti_1)

χ2 = 463.608, df = 77, CFI = 0.881, TLI = 0.86, RMSEA = 0.142, SRMR = 0.057, SRMRbetween = NA, SRMRwithin = NA

fpf(cfa_meti_1)

χ2 = 171.668, df = 74, CFI = 0.97, TLI = 0.963, RMSEA = 0.073, SRMR = 0.029, SRMRbetween = NA, SRMRwithin = NA

cfa_meti_2 <- cfa(cfa_meti_model_2, data = data_rbt%>%filter(meas_rep == 2))

fpf(cfa1d_meti_2)

χ2 = 479.509, df = 77, CFI = 0.896, TLI = 0.878, RMSEA = 0.145, SRMR = 0.053, SRMRbetween = NA, SRMRwithin = NA

fpf(cfa_meti_2)

χ2 = 166.6, df = 74, CFI = 0.976, TLI = 0.971, RMSEA = 0.071, SRMR = 0.029, SRMRbetween = NA, SRMRwithin = NA

anova(cfa1d_meti_1, cfa_meti_1)
## Chi-Squared Difference Test
## 
##              Df    AIC    BIC  Chisq Chisq diff Df diff Pr(>Chisq)    
## cfa_meti_1   74 8470.3 8579.4 171.67                                  
## cfa1d_meti_1 77 8756.2 8854.8 463.61     291.94       3  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(cfa1d_meti_2, cfa_meti_2)
## Chi-Squared Difference Test
## 
##              Df    AIC    BIC  Chisq Chisq diff Df diff Pr(>Chisq)    
## cfa_meti_2   74 7829.6 7938.8 166.60                                  
## cfa1d_meti_2 77 8136.5 8235.1 479.51     312.91       3  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

In an next step we ran a two-level CFA …

# onedimensional model
mcfa1d_meti_model <- "level: 1
                    meti =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6 +
                            int_1 + int_2 + int_3 + int_4 +
                            ben_1 + ben_2 + ben_3 + ben_4
                    
                    level: 2
                    meti =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6 +
                            int_1 + int_2 + int_3 + int_4 +
                            ben_1 + ben_2 + ben_3 + ben_4"



mcfa_meti_model <- "level: 1
                    exp_w =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6
                    int_w =~ int_1 + int_2 + int_3 + int_4 
                    ben_w =~ ben_1 + ben_2 + ben_3 + ben_4
                    
                    level: 2
                    exp_b =~ exp_1 + exp_2 + exp_3 + exp_4 + exp_5 + exp_6
                    int_b =~ int_1 + int_2 + int_3 + int_4 
                    ben_b =~ ben_1 + ben_2 + ben_3 + ben_4
int_2 ~~    int_3
exp_5 ~~    exp_6
int_4 ~~    ben_2
int_3 ~~    ben_1
exp_3 ~~ 0*exp_3
int_3 ~~ 0*int_3
ben_2 ~~ 0*ben_2
ben_3 ~~ 0*ben_3"


mcfa1d_meti <- cfa(mcfa1d_meti_model, data = data_rbt, cluster = "session")
mcfa_meti <- cfa(mcfa_meti_model, data = data_rbt, cluster = "session")
fpf(mcfa1d_meti)

χ2 = 473.759, df = 154, CFI = 0.954, TLI = 0.945, RMSEA = 0.064, SRMR = 0.304, SRMRbetween = 0.232, SRMRwithin = 0.072

fpf(mcfa_meti)

χ2 = 238.803, df = 148, CFI = 0.987, TLI = 0.984, RMSEA = 0.035, SRMR = 0.125, SRMRbetween = 0.085, SRMRwithin = 0.04

anova(mcfa1d_meti, mcfa_meti)
## Chi-Squared Difference Test
## 
##              Df   AIC   BIC  Chisq Chisq diff Df diff Pr(>Chisq)    
## mcfa_meti   148 16218 16538 238.80                                  
## mcfa1d_meti 154 16441 16736 473.76     234.96       6  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

3.2.2 Reliability (McDonalds \(\omega\))

# First Measurement
## Expertise
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 1) %>% select(starts_with("exp")))$est
## [1] 0.9420418
# Integrity
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 1) %>% select(starts_with("int")))$est
## [1] 0.9084106
# Benevolence
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 1) %>% select(starts_with("ben")))$est
## [1] 0.9006582
# Second Measurement
## Expertise
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 2) %>% select(starts_with("exp")))$est
## [1] 0.9623451
# Integrity
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 2) %>% select(starts_with("int")))$est
## [1] 0.9179565
# Benevolence
MBESS::ci.reliability(data_rbt%>%filter(meas_rep == 2) %>% select(starts_with("ben")))$est
## [1] 0.9110896

3.3 Topic specific multiplism

3.3.1 Dimensionality (CFA)

First we specified two consecutive onedimensional CFA models

cfa_tsm_model <- "tsm =~ a*tsm_1 + a*tsm_2 + a*tsm_3 + a*tsm_4
                  tsm_2 ~~ tsm_4"

cfa_tsm_1 <-
  cfa(cfa_tsm_model, 
      data = data_rbt %>% 
        filter(meas_rep == 1),
      missing = "fiml")
fpf(cfa_tsm_1)

χ2 = 5.245, df = 4, CFI = 0.993, TLI = 0.99, RMSEA = 0.035, SRMR = 0.034, SRMRbetween = NA, SRMRwithin = NA

cfa_tsm_2 <-
  cfa(cfa_tsm_model, 
      data = data_rbt %>% 
        filter(meas_rep == 2))
fpf(cfa_tsm_2)

χ2 = 7.677, df = 4, CFI = 0.974, TLI = 0.962, RMSEA = 0.061, SRMR = 0.058, SRMRbetween = NA, SRMRwithin = NA

In an next step, we ran a two-level CFA …

mcfa_tsm_model <- "level: 1
                    tsm_w =~ tsm_1 + tsm_2 + tsm_3 + tsm_4
                    tsm_2 ~~ tsm_4

                    level: 2
                    tsm_b =~ tsm_1 + tsm_2 + tsm_3 + tsm_4
                    tsm_2 ~~ tsm_4
                    "

mcfa_tsm <- cfa(mcfa_tsm_model, data = data_rbt, cluster = "session")
fpf(mcfa_tsm)

χ2 = 1.023, df = 2, CFI = 1, TLI = 1.02, RMSEA = 0, SRMR = 0.027, SRMRbetween = 0.019, SRMRwithin = 0.008

3.3.2 Reliability (McDonalds \(\omega\))

# First Measurement
MBESS::ci.reliability(data_rbt %>% filter(meas_rep ==1) %>% select(starts_with("tsm")))$est
## [1] 0.6911811
# Second Measurement
MBESS::ci.reliability(data_rbt %>% filter(meas_rep ==2) %>% select(starts_with("tsm")))$est
## [1] 0.6447776

3.4 Treatment check

3.4.1 Dimensionality (CFA)

We specified two consecutive onedimensional CFA models

cfa_tch_model <- "tch =~ tch_1 + tch_2 + tch_3 + tch_4 + tch_5
                  tch_1 ~~ tch_4"

cfa_tch_model2 <- "tch =~ tch_1 + tch_2 + tch_3 + tch_4 + tch_5
                  tch_1 ~~ tch_4
                  tch_3 ~~ tch_5
                  tch_1 ~~ tch_2"

cfa_tch_1 <- cfa(cfa_tch_model, 
                 data = data_tch_n%>%
                   filter(meas_rep == 1))
fpf(cfa_tch_1)

χ2 = 9.287, df = 4, CFI = 0.996, TLI = 0.989, RMSEA = 0.089, SRMR = 0.008, SRMRbetween = NA, SRMRwithin = NA

cfa_tch_2 <- cfa(cfa_tch_model2, data = data_tch_n%>%filter(meas_rep == 2))
fpf(cfa_tch_2)

χ2 = 0.269, df = 2, CFI = 1, TLI = 1.006, RMSEA = 0, SRMR = 0.001, SRMRbetween = NA, SRMRwithin = NA

modificationindices(cfa_tch_2, sort. = T)
 lhs op   rhs    mi    epc sepc.lv sepc.all sepc.nox

20 tch_3 ~~ tch_4 0.262 0.009 0.009 0.045 0.045 21 tch_4 ~~ tch_5 0.262 -0.009 -0.009 -0.050 -0.050 19 tch_2 ~~ tch_5 0.075 0.005 0.005 0.043 0.043 17 tch_2 ~~ tch_3 0.075 -0.005 -0.005 -0.038 -0.038 15 tch_1 ~~ tch_3 0.072 -0.005 -0.005 -0.020 -0.020 16 tch_1 ~~ tch_5 0.072 0.005 0.005 0.022 0.022

3.4.2 Reliability (Ordinal McDonalds \(\omega\))

# First Measurement
MBESS::ci.reliability(data_rbt %>% filter(meas_rep == 1) %>% select(starts_with("tch")))$est
## [1] 0.8746516
# Second Measurement
MBESS::ci.reliability(data_rbt %>% filter(meas_rep == 2) %>% select(starts_with("tch")))$est
## [1] 0.9149272

3.5 Table of (M)CFA fit-indices

tibble(`1d CFA METI 1` = fitmeasures(cfa1d_meti_1)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d CFA METI 2` = fitmeasures(cfa1d_meti_2)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `3d CFA METI 1` = fitmeasures(cfa_meti_1)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `3d CFA METI 2` = fitmeasures(cfa_meti_2)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d MCFA METI` = fitmeasures(mcfa1d_meti)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `3d MCFA METI` = fitmeasures(mcfa_meti)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d CFA TSM 1` = fitmeasures(cfa_tsm_1)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d CFA TSM 2` = fitmeasures(cfa_tsm_2)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d MCFA TSM` = fitmeasures(mcfa_tsm)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d CFA TCH 1` = fitmeasures(cfa_tch_1)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       `1d CFA TCH 2` = fitmeasures(cfa_tch_2)[c("chisq", "df", "cfi", "tli", "rmsea", 
                                              "srmr", "srmr_between", "srmr_within", "bic", "aic")],
       rownames = c("χ^2^", "_df_", "CFI", "TLI", "RMSEA", "SRMR", "SRMR~between~", "SRMR~within~", "BIC", "AIC"))%>%
  column_to_rownames(var = "rownames")%>%
  knitr::kable(., digits = 3)
1d CFA METI 1 1d CFA METI 2 3d CFA METI 1 3d CFA METI 2 1d MCFA METI 3d MCFA METI 1d CFA TSM 1 1d CFA TSM 2 1d MCFA TSM 1d CFA TCH 1 1d CFA TCH 2
χ2 463.608 479.509 171.668 166.600 473.759 238.803 5.245 7.677 1.023 9.287 0.269
df 77.000 77.000 74.000 74.000 154.000 148.000 4.000 4.000 2.000 4.000 2.000
CFI 0.881 0.896 0.970 0.976 0.954 0.987 0.993 0.974 1.000 0.996 1.000
TLI 0.860 0.878 0.963 0.971 0.945 0.984 0.990 0.962 1.020 0.989 1.006
RMSEA 0.142 0.145 0.073 0.071 0.064 0.035 0.035 0.061 0.000 0.089 0.000
SRMR 0.057 0.053 0.029 0.029 0.304 0.125 0.034 0.058 0.027 0.008 0.001
SRMRbetween NA NA NA NA 0.232 0.085 NA NA 0.019 NA NA
SRMRwithin NA NA NA NA 0.072 0.040 NA NA 0.008 NA NA
BIC 8854.815 8235.105 8579.439 7938.761 16735.574 16537.906 2369.599 2237.418 4559.431 1667.935 1685.730
AIC 8756.214 8136.504 8470.274 7829.596 16440.552 16217.596 2334.384 2216.290 4466.709 1633.572 1644.514

4 Results of the treatmentcheck

4.1 Plot

res_tch_data <- data_tch%>%
  gather(variable, value, starts_with("tch_"))%>%
  group_by(treat, variable, value)%>%
  mutate(value = as.character(value)) %>% 
  summarize(freq = n())%>%
  ungroup()%>%
  mutate(treat = case_when(treat == "GB" ~ "Greyed out badges",
                           treat == "CB" ~ "Colored badges",
                           treat == "CC" ~ "Control condition",
                           T ~ treat),
         value = case_when(value =="-999" ~ "don't know",
                           T ~ value),
         variable = case_when(variable == "tch_1" ~ "item 1", 
                              variable == "tch_2" ~ "item 2", 
                              variable == "tch_3" ~ "item 3", 
                              variable == "tch_4" ~ "item 4", 
                              variable == "tch_5" ~ "item 5"),
         Frequency = freq)
## `summarise()` regrouping output by 'treat', 'variable' (override with `.groups` argument)
res_tch_plot <- ggplot(res_tch_data, aes(variable, value)) + 
  geom_point(aes(size = Frequency, color = Frequency), shape = 15) +
  scale_size_continuous(range = c(3,15)) + 
  scale_color_gradient(low = "grey95", high = "grey65") +
  guides(color=guide_legend(), size = guide_legend()) +
  facet_wrap(~treat) + 
  theme_ipsum_ps() + 
  ggtitle("Results of the treatment check", "Frequency per item and experimental condition") + 
  ylab("") + 
  xlab("")
  
res_tch_plot

res_tch_plot_pub <- ggplot(res_tch_data %>%
                         mutate(treat = case_when(treat == "Greyed out badges" ~ "Grey badges",
                                                  TRUE ~ treat)), aes(variable, value)) + 
  geom_point(aes(size = Frequency, color = Frequency), shape = 15) +
  scale_size_continuous(range = c(3,15)) + 
  scale_color_gradient(low = "grey95", high = "grey65") +
  guides(color=guide_legend(), size = guide_legend()) +
  facet_wrap(~treat) + 
  theme_ipsum_ps() + 
  ylab("") + 
  xlab("")


#ggsave("8_reproducible_documentation_of_analyses/Fig/Fig6.jpg", res_tch_plot_pub, width = 130, height = 50, units = "mm", dpi = 300, scale = 2.4)

4.2 Effect size

res_tch_data_A <- data_tch_n%>%
  filter(treat != "CC")%>%
  filter(tch_1 != -999)

effsize::VD.A(tch_1 ~ treat, data = res_tch_data_A)   
## 
## Vargha and Delaney A
## 
## A estimate: 0.9378639 (large)

5 Graphical exploration

5.1 Plot Hyp 1

data_scales_lables%>%
  gather(Variable, Value, Experitse, Integrity, Benevolence)%>%
  ggplot(., aes(x = Treatment, y = Value)) + 
  geom_violin(adjust = 1.5) +
  stat_summary(fun.data = "mean_sdl", fun.args = list(mult = 1)) +
  coord_flip() +
  facet_wrap(~ Variable, nrow = 1) + 
  labs(title = "Graphical overview (Hyp 1)",
       subtitle = "Violinplots and means ± 1*SD",
       caption = "") +
  ylim(1,7) +
  hrbrthemes::theme_ipsum_ps()

# Export data for combined plot
#data_scales_lables %>% 
#  mutate(Study = "Study 2") %>% 
#  write_csv(., "8_reproducible_documentation_of_analyses/Fig/data_scales_lables_study_2.csv")

5.2 Descriptive Effect Sizes Hyp 1

A_GB_CC <- data_scales_lables%>%
  filter(treat != "CB")%>%
  mutate(treat = as.character(treat))%>%
  effsize::VD.A(Integrity ~ treat, data = .)%>%
  .$estimate


A_CC_CB <- data_scales_lables%>%
  filter(treat != "GB")%>%
  mutate(treat = as.character(treat))%>%
  effsize::VD.A(Integrity ~ treat, data = .)%>%
  .$estimate


A_GB_CB <- data_scales_lables%>%
  filter(treat != "CC")%>%
  mutate(treat = as.character(treat))%>%
  effsize::VD.A(Integrity ~ treat, data = .)%>%
  .$estimate
GB CC
CC A = 0.65, d = 0.55
CB A = 0.71, d = 0.77 A = 0.57, d = 0.25

5.3 Hyp 2/3

plot_hyp2_1 <- ggplot(data_scales_lables, 
                      aes(x=`Topic specific multiplism`, y = Integrity)) + 
  geom_jitter() +
  facet_wrap(~ Treatment, nrow = 1) + 
  labs(title = "Graphical overview (Hyp 2/3)",
       subtitle = "Jitter plot per treatment") +
  hrbrthemes::theme_ipsum_ps()


plot_hyp2_1 + stat_smooth()
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'

plot_hyp2_1 + stat_smooth(method = "lm")
## `geom_smooth()` using formula 'y ~ x'

5.4 Descriptive Effect Sizes Hyp 3/4

Spearman and Kendall correlations:

r_GB <- round(cor(data_scales_lables%>%
                        filter(treat == "GB")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "GB")%>%
                        pull(`Topic specific multiplism`),
                      method = "pearson", use = "pairwise.complete.obs"), 2)
t_GB <- round(cor(data_scales_lables%>%
                        filter(treat == "GB")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "GB")%>%
                        pull(`Topic specific multiplism`),
                      method = "kendall", use = "pairwise.complete.obs"), 2)

r_CC <- round(cor(data_scales_lables%>%
                        filter(treat == "CC")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "CC")%>%
                        pull(`Topic specific multiplism`),
                      method = "pearson", use = "pairwise.complete.obs"), 2)
t_CC <- round(cor(data_scales_lables%>%
                        filter(treat == "CC")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "CC")%>%
                        pull(`Topic specific multiplism`),
                      method = "kendall", use = "pairwise.complete.obs"), 2)

r_CB <- round(cor(data_scales_lables%>%
                        filter(treat == "CB")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "CB")%>%
                        pull(`Topic specific multiplism`),
                      method = "pearson", use = "pairwise.complete.obs"), 2)
t_CB <- round(cor(data_scales_lables%>%
                        filter(treat == "CB")%>%
                        pull(Integrity),
                      data_scales_lables%>%
                        filter(treat == "CB")%>%
                        pull(`Topic specific multiplism`),
                      method = "kendall", use = "pairwise.complete.obs"), 2)
GB CC CB
\(r(integrity, multiplism)\) -0.34 -0.32 -0.35
\(\tau(integrity, multiplism)\) -0.23 -0.23 -0.24

5.5 Hyp 4

data_scales_lables%>%
  mutate(Treatment = case_when(treat == "GB" ~ "Greyed out badges",
                               treat == "CB" ~ "Colored badges",
                               treat == "CC" ~ "Control condition",
                               T ~ "treat"))%>%
  ggplot(., aes(x = Treatment, y = `Topic specific multiplism`)) + 
  geom_violin(adjust = 1.5, alpha = .5) +
  stat_summary(fun.data = "mean_sdl", fun.args = list(mult = 1)) +
  coord_flip() +
  labs(title = "Graphical overview (Hyp 4)",
       subtitle = "Violinplots and means ± 1*SD",
       caption = "") +
  xlab("") +
  ylim(1,4) +
  hrbrthemes::theme_ipsum_ps()

fig4 <- data_scales_lables%>%
 mutate(Treatment = case_when(treat == "GB" ~ "Greyed out badges",
                              treat == "CB" ~ "Colored badges",
                              treat == "CC" ~ "Control condition",
                              T ~ "treat"))%>%
 ggplot(., aes(x = Treatment, y = `Topic specific multiplism`)) + 
 geom_violin(adjust = 1.5, alpha = .5) +
 stat_summary(fun.data = "mean_sdl", fun.args = list(mult = 1)) +
 coord_flip() +
 xlab("") +
 ylim(1,4) +
 hrbrthemes::theme_ipsum_ps()

5.6 Descriptive Effect Sizes Hyp 4

A_mult_GB_CC <- effsize::VD.A(`Topic specific multiplism` ~ treat, 
                               data = data_scales_lables%>%
                                 filter(treat != "CB")%>%
                                 mutate(treat = as.character(treat)))$estimate
d_mult_GB_CC <- qnorm(A_mult_GB_CC)*sqrt(2)


A_mult_CC_CB <- effsize::VD.A(`Topic specific multiplism` ~ treat, 
                               data = data_scales_lables%>%
                                 filter(treat != "GB")%>%
                                 mutate(treat = as.character(treat)))$estimate
d_mult_CC_CB <- qnorm(A_mult_CC_CB)*sqrt(2)


A_mult_GB_CB <- effsize::VD.A(`Topic specific multiplism` ~ treat, 
                               data = data_scales_lables%>%
                                 filter(treat != "CC")%>%
                                 mutate(treat = as.character(treat)))$estimate
d_mult_GB_CB <- qnorm(A_mult_GB_CB)*sqrt(2)
GB CC
CC A = 0.43, d = -0.27
CB A = 0.43, d = -0.24 A = 0.51, d = 0.02

6 Inference statistics (Hyp 1)

6.1 Description of the variables

All of the following analyses are based on the data frame object data_scales_wide which is why we describe it here somewhat more detailed.
All analyses are based on measured variables Integrity (Integrity, is information source sincere, honest, just, unselfish and fair?) and Topic Specific Multiplism (TSM, is knowledge and knowing about a topic arbitrary?). As this data set is in wide format, the experimental conditions are encoded wtihin the variable names:

  • GB means, that the participants of the study could learn from the grey out badges (ans corresponding explanations) within the abstract, that the authors of the study denied to use Open Practices
  • CC means, that the participants of the study could not learn if or if not the authors of the study used Open Practices
  • CBmeans, that the participants of the study could learn from the colored badges (ans corresponding explanations) within the abstract, that the authors of the study used Open Practices

Finally, the variable session identified the study participants.

If we look descriptively at these variables:

data_scales_wide%>%
  my_skim(.)
Data summary
Name Piped data
Number of rows 250
Number of columns 13
_______________________
Column type frequency:
character 1
numeric 12
________________________
Group variables None

Variable type: character

skim_variable n_missing complete_rate min max empty n_unique whitespace
session 0 1 64 64 0 250 0

Variable type: numeric

skim_variable n_missing complete_rate mean sd p0 p25 p50 p75 p100 hist skewn kurto maxabszvalue
Benevolence_CB 78 0.69 5.51 1.05 2.75 4.75 5.50 6.25 7 ▁▃▇▇▇ -0.34 -0.63 2.63
Benevolence_CC 85 0.66 5.27 1.02 2.50 4.50 5.25 6.00 7 ▁▅▆▇▅ -0.10 -0.68 2.73
Benevolence_GB 87 0.65 4.64 1.15 1.75 3.75 4.75 5.50 7 ▂▆▇▇▅ -0.06 -0.43 2.52
Experitse_CB 78 0.69 5.72 1.05 2.33 5.12 5.83 6.67 7 ▁▂▃▇▇ -0.65 -0.22 3.23
Experitse_CC 85 0.66 5.71 1.04 2.50 5.17 5.83 6.50 7 ▁▂▃▇▇ -0.80 0.40 3.09
Experitse_GB 87 0.65 4.91 1.18 1.67 4.00 5.00 5.83 7 ▁▃▇▇▆ -0.11 -0.49 2.76
Integrity_CB 78 0.69 5.57 1.01 3.00 4.75 5.75 6.50 7 ▁▅▅▇▇ -0.27 -0.93 2.55
Integrity_CC 85 0.66 5.32 0.99 2.75 4.50 5.25 6.00 7 ▁▅▇▇▆ -0.12 -0.84 2.61
Integrity_GB 87 0.65 4.71 1.15 2.00 4.00 4.75 5.50 7 ▂▆▇▆▃ -0.03 -0.54 2.36
TSM_CB 78 0.69 2.31 0.59 1.00 2.00 2.25 2.75 4 ▂▅▇▂▁ -0.01 -0.42 2.86
TSM_CC 85 0.66 2.32 0.59 1.00 2.00 2.25 2.75 4 ▂▆▇▂▁ 0.37 -0.17 2.87
TSM_GB 87 0.65 2.46 0.54 1.25 2.00 2.50 2.75 4 ▃▇▇▅▁ 0.26 -0.30 2.83

6.2 Investigating the missingness

6.2.1 Missingness per Variable

library(naniar)
## 
## Attaching package: 'naniar'
## The following object is masked from 'package:skimr':
## 
##     n_complete
visdat::vis_miss(data_scales_wide%>%select(-session)) + 
  ggtitle("Missingness per Variable") + 
  theme(plot.margin = margin(1, 2, 1, 1, "cm"))

6.2.2 Marginal distributions Integrity_GB

library(patchwork)
ggplot(data_scales_wide, aes(Integrity_GB, Integrity_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, Integrity_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, Integrity_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       Integrity_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       Integrity_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       Integrity_GB)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.3 Marginal distributions Integrity_CC

ggplot(data_scales_wide, aes(Integrity_GB, Integrity_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, Integrity_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, Integrity_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       Integrity_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       Integrity_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       Integrity_CC)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.4 Marginal distributions Integrity_CB

ggplot(data_scales_wide, aes(Integrity_GB, Integrity_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, Integrity_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, Integrity_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       Integrity_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       Integrity_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       Integrity_CB)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.5 Marginal distributions TSM_GB

ggplot(data_scales_wide, aes(Integrity_GB, TSM_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, TSM_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, TSM_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       TSM_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       TSM_GB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       TSM_GB)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.6 Marginal distributions TSM_CC

ggplot(data_scales_wide, aes(Integrity_GB, TSM_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, TSM_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, TSM_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       TSM_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       TSM_CC)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       TSM_CC)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.7 Marginal distributions TSM_CB

ggplot(data_scales_wide, aes(Integrity_GB, TSM_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CC, TSM_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(Integrity_CB, TSM_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_GB,       TSM_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CC,       TSM_CB)) + 
  geom_miss_point(alpha = .2) +
  theme_ipsum_ps() +
ggplot(data_scales_wide, aes(TSM_CB,       TSM_CB)) + 
  geom_miss_point(alpha = .2) + plot_layout(guides = 'collect') +
  theme_ipsum_ps()

6.2.8 Cohen’s d of missing/not-missing per variable

d_matrix_missings <- miceadds::mi_dstat(data_scales_wide%>%select(starts_with("Int"), starts_with("TSM")))%>%
  round(.,4)

knitr::kable(d_matrix_missings, caption = "Boxplot of Cohen's d of missing/not-missing per variable")
Boxplot of Cohen’s d of missing/not-missing per variable
Integrity_CB Integrity_CC Integrity_GB TSM_CB TSM_CC TSM_GB
Integrity_CB NaN -0.0349 -0.0827 NaN -0.2 -0.0511
Integrity_CC 0.0504 NaN 0.0827 -0.0026 NaN 0.0511
Integrity_GB -0.0504 0.0349 NaN 0.0026 0.2 NaN
TSM_CB NaN -0.0349 -0.0827 NaN -0.2 -0.0511
TSM_CC 0.0504 NaN 0.0827 -0.0026 NaN 0.0511
TSM_GB -0.0504 0.0349 NaN 0.0026 0.2 NaN
boxplot(as.vector(d_matrix_missings))
title("Boxplot of Cohen's d of missing/not-missing per variable")

6.3 Imputation

M <- 1000
out <- mice(data = data_scales_wide%>%select(-session),
            m = M,
            meth=c("norm","norm","norm",
                   "norm","norm","norm",
                   "norm","norm","norm",
                   "norm","norm","norm"), 
            diagnostics = FALSE,
            printFlag = F,
            seed = 83851)

6.4 Check of first 10 imputation

out_first10 <-   mice(data = data_scales_wide%>%select(-session),
            m = 10,
            meth=c("norm","norm","norm",
                   "norm","norm","norm",
                   "norm","norm","norm",
                   "norm","norm","norm"), 
            diagnostics = FALSE,
            printFlag = F,
            seed = 83851)

densityplot(out_first10)

plot(out_first10)

6.5 Parameter and BF estimation

# Set up the matrices for the estimates ##############
# setup of matrices to store multiple estimates
mulest_hyp1 <- matrix(0,nrow=M,ncol=3) 
# and covariance matrices
covwithin_hyp1 <- matrix(0,nrow=3,ncol=3) 


# Estimate the coefficients for each data frame ######
for(i in 1:M) {
 within_hyp1 <- lm(cbind(Integrity_GB,Integrity_CC,Integrity_CB)~1, 
                   data=mice::complete(out,i)) # estimate the means of the three variables
 mulest_hyp1[i,]<-coef(within_hyp1)[1:3] # store these means in the matrix `mulres`
 covwithin_hyp1<-covwithin_hyp1 + 1/M * vcov(within_hyp1)[1:3,1:3] # compute the averages 
}


# Compute the average of the estimates ###############
estimates_hyp1 <- colMeans(mulest_hyp1)
names(estimates_hyp1) <- c("Integrity_GB","Integrity_CC","Integrity_CB")
covbetween_hyp1 <- cov(mulest_hyp1) # between covariance matrix
covariance_hyp1 <- covwithin_hyp1 + (1+1/M)*covbetween_hyp1 # total variance


# Determine the effective and real sample sizes ######
samp_hyp1 <- nrow(data_scales_wide) # real sample size
nucom_hyp1<-samp_hyp1-length(estimates_hyp1)

# corresponds to Equation (X) in Hoijtink, Gu, Mulder, & Rosseel (2019) ...
lam_hyp1 <- (1+1/M)*(1/length(estimates_hyp1))* 
  sum(diag(covbetween_hyp1 %*% 
             MASS::ginv(covariance_hyp1))) # ... (43)
nuold_hyp1<-(M-1)/(lam_hyp1^2) # ... (44)
nuobs_hyp1<-(nucom_hyp1+1)/(nucom_hyp1+3)*nucom_hyp1*(1-lam_hyp1) # ... (46)
nu_hyp1<- nuold_hyp1*nuobs_hyp1/(nuold_hyp1+nuobs_hyp1) # ... (47)
fracmis_hyp1 <- (nu_hyp1+1)/(nu_hyp1+3)*lam_hyp1 + 2/(nu_hyp1+3) # ... (48)
neff_hyp1<-samp_hyp1-samp_hyp1*fracmis_hyp1 # = 172 approx. 2/3* 270

# coerce `covariance` to a list
covariance_hyp1<-list(covariance_hyp1)


# Test the hypotheses with bain ######################
set.seed(6346)
results_hyp1 <- bain(estimates_hyp1,
               "Integrity_GB<Integrity_CC<Integrity_CB;
                Integrity_GB=Integrity_CC=Integrity_CB;
                Integrity_GB<Integrity_CC=Integrity_CB",
                n = neff_hyp1, Sigma=covariance_hyp1,    
                group_parameters=3, joint_parameters = 0)
print(results_hyp1)
## Bayesian informative hypothesis testing for an object of class numeric:
## 
##    Fit   Com   BF.u  BF.c    PMPa  PMPb 
## H1 0.994 0.183 5.438 803.908 0.882 0.759
## H2 0.000 0.203 0.000 0.000   0.000 0.000
## H3 0.166 0.229 0.725 0.725   0.118 0.101
## Hu                                 0.140
## 
## Hypotheses:
##   H1: Integrity_GB<Integrity_CC<Integrity_CB
##   H2: Integrity_GB=Integrity_CC=Integrity_CB
##   H3: Integrity_GB<Integrity_CC=Integrity_CB
## 
## Note: BF.u denotes the Bayes factor of the hypothesis at hand versus the unconstrained hypothesis Hu. BF.c denotes the Bayes factor of the hypothesis at hand versus its complement.
summary(results_hyp1)
##      Parameter        n Estimate       lb       ub
## 1 Integrity_GB 167.6928 4.728577 4.556397 4.900757
## 2 Integrity_CC 167.6928 5.319534 5.174336 5.464732
## 3 Integrity_CB 167.6928 5.561620 5.413517 5.709722
results_hyp1$BFmatrix
##              H1           H2           H3
## H1 1.000000e+00 155466504142 7.497718e+00
## H2 6.432254e-12            1 4.822723e-11
## H3 1.333739e-01  20735175670 1.000000e+00

7 Inference statistics (Hyp 2)

7.1 Parameter estimation

path_mod <- "Integrity_GB ~ TSM_GB
             Integrity_CC ~ TSM_CC             
             Integrity_CB ~ TSM_CB"

# Set up the matrices for the estimates ##############
best_hyp2 <- matrix(0,nrow=M,ncol=3) # setup of matrices to store multiple estimates
covwithin_hyp2 <- matrix(0,nrow=3,ncol=3) # and covariance matrices


# Estimate the coefficients for each data frame ######
for(i in 1:M) {
 path_fit <- sem(path_mod, 
                 data = mice::complete(out, i), 
                 std.lv = T) # estimate the path coefficients 
 best_hyp2[i,] <- parameterestimates(path_fit, standardized = T)%>% # store path coefficients
                     filter(op == "~")%>%
                     pull(std.all) 
 covwithin_hyp2 <- covwithin_hyp2 + # compute the average of the covariance matrices
                  1/M * lavInspect(path_fit, 
                                   "vcov.std.all")[c("Integrity_GB~TSM_GB", 
                                                     "Integrity_CC~TSM_CC", 
                                                     "Integrity_CB~TSM_CB"),
                                                   c("Integrity_GB~TSM_GB", 
                                                     "Integrity_CC~TSM_CC", 
                                                     "Integrity_CB~TSM_CB")]
}

# Compute the average of the estimates ###############
estimates_hyp2 <- colMeans(best_hyp2)
names(estimates_hyp2) <- c("Int_on_TSM_GB", "Int_on_TSM_CC", "Int_on_TSM_CB")
round(estimates_hyp2, 2)
## Int_on_TSM_GB Int_on_TSM_CC Int_on_TSM_CB 
##         -0.35         -0.37         -0.41

7.2 Visual path model

\usetikzlibrary{arrows,positioning}
\usetikzlibrary{calc}
\begin{tikzpicture}[auto,>=latex,align=center,
latent/.style={circle,draw, thick,inner sep=0pt,minimum size=10mm},
manifest/.style={rectangle,draw, thick,inner sep=2pt,minimum size=15mm},
manifestcov/.style={rectangle,draw, thick,inner sep=1pt,minimum size=8mm},
mean/.style={regular polygon,regular polygon sides=3,draw,thick,inner sep=0pt,minimum size=10mm},
paths/.style={->, thick, >=latex, font = \footnotesize\sffamily, fill = white},
variance/.style={<->, thin, >=latex, bend left=270, looseness=2.5, font = \footnotesize},
covariance/.style={<->, thin, >=latex, bend left=290, looseness=.5, font = \footnotesize},
dotsdots/.style={circle, draw, fill = black, minimum size = 1mm, maximum size = 1mm}
]

%% AVs
\node [manifest] (AV1) at (0, -2)       {$int_{gb}$};
\node [manifest] (AV2) [right = of AV1] {$int_{cc}$};
\node [manifest] (AV3) [right = of AV2] {$int_{cb}$};


%% UVs
\node [manifest] (UV1) at (0, 2)        {$tsm_{gb}$};    
\node [manifest] (UV2) [right =of UV1]  {$tsm_{cc}$};    
\node [manifest] (UV3) [right =of UV2]  {$tsm_{cb}$};    

%% Paths
\draw [paths] (UV1) to node [midway]{-.35} (AV1);
\draw [paths] (UV2) to node [midway]{-.37} (AV2);
\draw [paths] (UV3) to node [midway]{-.41} (AV3);

%\draw [covariance] (UV2) to node {} (UV1);
\end
{tikzpicture}

covbetween_hyp2 <- cov(best_hyp2) # between covariance matrix
covariance_hyp2 <- covwithin_hyp2 + (1+1/M)*covbetween_hyp2 # total variance

# Determine the effective and real sample sizes ######
samp_hyp2 <- nrow(data_scales_wide) # real sample size
nucom_hyp2 <- samp_hyp2-length(estimates_hyp2)

# corresponds to Equation (X) in Hoijtink, Gu, Mulder, & Rosseel (2019) ...
lam_hyp2 <- (1+1/M)*(1/length(estimates_hyp2))* 
  sum(diag(covbetween_hyp2 %*% 
             MASS::ginv(covariance_hyp2))) # ... (43)
nuold_hyp2 <- (M-1)/(lam_hyp2^2) # ... (44)
nuobs_hyp2 <- (nucom_hyp2+1)/(nucom_hyp2+3)*nucom_hyp2*(1-lam_hyp2) # ... (46)
nu_hyp2 <- nuold_hyp2*nuobs_hyp2/(nuold_hyp2+nuobs_hyp2) # ... (47)
fracmis_hyp2 <- (nu_hyp2+1)/(nu_hyp2+3)*lam_hyp2 + 2/(nu_hyp2+3) # ... (48)
neff_hyp2 <- samp_hyp2-samp_hyp2*fracmis_hyp2 # = 114 approx. 2/3* 270

# coerce `covariance` to a list
covariance_hyp2 <- list(covariance_hyp2)

7.3 Bayes Factor estimation (Hyp 2)

set.seed(6346)
results_hyp2 <- bain(estimates_hyp2, 
                     "Int_on_TSM_GB < 0 & Int_on_TSM_CC < 0 & Int_on_TSM_CB < 0;
                     Int_on_TSM_GB = 0 & Int_on_TSM_CC = 0 & Int_on_TSM_CB = 0",
                    Sigma = covariance_hyp2,
                    n = neff_hyp2,
                    group_parameters = 3,
                    joint_parameters = 0,
                    standardize = F)
print(results_hyp2)
## Bayesian informative hypothesis testing for an object of class numeric:
## 
##    Fit   Com   BF.u  BF.c         PMPa  PMPb 
## H1 1.000 0.145 6.890 26134059.789 1.000 0.873
## H2 0.000 0.741 0.000 0.000        0.000 0.000
## Hu                                      0.127
## 
## Hypotheses:
##   H1: Int_on_TSM_GB<0&Int_on_TSM_CC<0&Int_on_TSM_CB<0
##   H2: Int_on_TSM_GB=0&Int_on_TSM_CC=0&Int_on_TSM_CB=0
## 
## Note: BF.u denotes the Bayes factor of the hypothesis at hand versus the unconstrained hypothesis Hu. BF.c denotes the Bayes factor of the hypothesis at hand versus its complement.
results_hyp2$BFmatrix
##              H1           H2
## H1 1.000000e+00 2.558656e+16
## H2 3.908302e-17 1.000000e+00

8 Inference statistics (Hyp 3)

set.seed(6346)
results_hyp3 <- bain(estimates_hyp2, 
                    "Int_on_TSM_GB < Int_on_TSM_CC < Int_on_TSM_CB;
                     (Int_on_TSM_GB, Int_on_TSM_CC) < Int_on_TSM_CB;
                     Int_on_TSM_GB = Int_on_TSM_CC = Int_on_TSM_CB",
                    Sigma = covariance_hyp2,
                    n = neff_hyp2,
                    group_parameters = 3,
                    joint_parameters = 0,
                    standardize = T)

print(results_hyp3)
## Bayesian informative hypothesis testing for an object of class numeric:
## 
##    Fit    Com   BF.u   BF.c   PMPa  PMPb 
## H1 0.069  0.164 0.420  0.377  0.008 0.008
## H2 0.145  0.322 0.451  0.358  0.008 0.008
## H3 18.337 0.341 53.716 53.716 0.984 0.966
## Hu                                  0.018
## 
## Hypotheses:
##   H1: Int_on_TSM_GB<Int_on_TSM_CC<Int_on_TSM_CB
##   H2: (Int_on_TSM_GB,Int_on_TSM_CC)<Int_on_TSM_CB
##   H3: Int_on_TSM_GB=Int_on_TSM_CC=Int_on_TSM_CB
## 
## Note: BF.u denotes the Bayes factor of the hypothesis at hand versus the unconstrained hypothesis Hu. BF.c denotes the Bayes factor of the hypothesis at hand versus its complement.
results_hyp3$BFmatrix
##           H1          H2          H3
## H1   1.00000   0.9313845 0.007820809
## H2   1.07367   1.0000000 0.008396971
## H3 127.86401 119.0905634 1.000000000

9 Inference statistics (Hyp 4)

# Set up the matrices for the estimates ##############
mulest_hyp4 <- matrix(0,nrow=M,ncol=3) # setup of matrices to store multiple estimates
covwithin_hyp4 <- matrix(0,nrow=3,ncol=3) # and covariance matrices


# Estimate the coefficients for each data frame ######
for(i in 1:M) {
 within_hyp4 <- lm(cbind( TSM_GB, TSM_CC, TSM_CB) ~ 1, 
                   data=mice::complete(out,i)) # estimate the means of the three variables
 mulest_hyp4[i,]<-coef(within_hyp4)[1:3] # store these means in the matrix `mulres`
 covwithin_hyp4<-covwithin_hyp4 + 1/M * vcov(within_hyp4)[1:3,1:3] # compute the averages 
}


# Compute the average of the estimates ###############
estimates_hyp4 <- colMeans(mulest_hyp4)
names(estimates_hyp4) <- c("TSM_GB","TSM_CC","TSM_CB")
covbetween_hyp4 <- cov(mulest_hyp4) # between covariance matrix
covariance_hyp4 <- covwithin_hyp4 + (1+1/M)*covbetween_hyp4 # total variance


# Determine the effective and real sample sizes ######
samp_hyp4 <- nrow(data_scales_wide) # real sample size
nucom_hyp4<-samp_hyp4-length(estimates_hyp4)

# corresponds to Equation (X) in Hoijtink, Gu, Mulder, & Rosseel (2019) ...
lam_hyp4 <- (1+1/M)*(1/length(estimates_hyp4))* 
  sum(diag(covbetween_hyp4 %*% 
             MASS::ginv(covariance_hyp4))) # ... (43)
nuold_hyp4<-(M-1)/(lam_hyp4^2) # ... (44)
nuobs_hyp4<-(nucom_hyp4+1)/(nucom_hyp4+3)*nucom_hyp4*(1-lam_hyp4) # ... (46)
nu_hyp4<- nuold_hyp4*nuobs_hyp4/(nuold_hyp4+nuobs_hyp4) # ... (47)
fracmis_hyp4 <- (nu_hyp4+1)/(nu_hyp4+3)*lam_hyp4 + 2/(nu_hyp4+3) # ... (48)
neff_hyp4<-samp_hyp4-samp_hyp4*fracmis_hyp4 # = 172 approx. 2/3* 270

# coerce `covariance` to a list
covariance_hyp4<-list(covariance_hyp4)


# Test the hypotheses with bain ######################
set.seed(6346)
results_hyp4 <- bain(estimates_hyp4,
               "TSM_GB>TSM_CC>TSM_CB;
                TSM_GB=TSM_CC=TSM_CB;
                (TSM_GB,TSM_CC)>TSM_CB",
                n = neff_hyp4, Sigma=covariance_hyp4,    
                group_parameters=3, joint_parameters = 0)

print(results_hyp4)
## Bayesian informative hypothesis testing for an object of class numeric:
## 
##    Fit   Com   BF.u  BF.c   PMPa  PMPb 
## H1 0.742 0.161 4.621 15.055 0.648 0.568
## H2 0.156 0.755 0.206 0.206  0.029 0.025
## H3 0.751 0.325 2.308 6.252  0.323 0.284
## Hu                                0.123
## 
## Hypotheses:
##   H1: TSM_GB>TSM_CC>TSM_CB
##   H2: TSM_GB=TSM_CC=TSM_CB
##   H3: (TSM_GB,TSM_CC)>TSM_CB
## 
## Note: BF.u denotes the Bayes factor of the hypothesis at hand versus the unconstrained hypothesis Hu. BF.c denotes the Bayes factor of the hypothesis at hand versus its complement.
results_hyp4$BFmatrix
##            H1       H2         H3
## H1 1.00000000 22.41071 2.00252396
## H2 0.04462152  1.00000 0.08935566
## H3 0.49936981 11.19123 1.00000000